import numpy as np
import pandas as pd
from PIL import ImageFile
from tqdm import tqdm
import cv2
import matplotlib.pylab as plt
from matplotlib import cm
%matplotlib inline
from sklearn.model_selection import train_test_split
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.preprocessing import image as keras_image
from tensorflow.keras.models import Sequential, load_model
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Activation, Flatten, Dropout, BatchNormalization
from tensorflow.keras.layers import Conv2D, MaxPooling2D, GlobalMaxPooling2D
from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint
from tensorflow.keras.layers import PReLU, LeakyReLU
import seaborn as sns
def image_to_tensor(img_path):
img = keras_image.load_img(r"C:\Users\PRABHAT\Desktop\PRESENT\New folder" + img_path, target_size=(128, 128))
x = keras_image.img_to_array(img)
return np.expand_dims(x, axis=0) # axis tells at what porisition we want to add the new( here at the beginning)
def data_to_tensor(img_paths):
list_of_tensors = [image_to_tensor(img_path) for img_path in tqdm(img_paths)]
return np.vstack(list_of_tensors)
# tqdm is used to add progress bar so as to keep a check on the progress of the dat processing
The data_to_tensor function takes a list of image file paths as input, and returns a NumPy array that contains a tensor for each image in the input list. The function calls the image_to_tensor function on each input image file path using a list comprehension, and then uses the np.vstack function to stack the resulting tensors vertically into a single NumPy array.
The resulting NumPy array represents the input data for a deep learning model, where each row of the array corresponds to a single input example, and the columns of the array correspond to the features or dimensions of each example. In the case of image data, the columns of the array correspond to the pixel values of the images.
NOTE- img_to_tensor is used to convert the img to store the image in an array(tensor) and in the fixed size of 128x128 and then data_to_tensor store the array in form of a stack
NOTE- img_to_tensor is used to convert the img to store the image in an array(tensor) and in the fixed size of 128x128 and then data_to_tensor store the array in form of a stack
ImageFile.LOAD_TRUNCATED_IMAGES = True
# Load the data
data = pd.read_csv(r"C:\Users\PRABHAT\Desktop\PRESENT\Flower_samples.csv")
files = data['file']
targets = data['label'].values
tensors = data_to_tensor(files);
# It is a good parctice to have difference between input(files) and output(target)
100%|███████████████████████████████████████████████████████████████████████████████| 304/304 [00:00<00:00, 535.25it/s]
data.head() # displays first five rows of the dataset
| file | label | name | |
|---|---|---|---|
| 0 | \8223968_6b51555d2f_n.jpg | 1 | Dandelions |
| 1 | \2067882323_8de6623ffd.jpg | 3 | Sunflower |
| 2 | \110472418_87b6a3aa98_m.jpg | 2 | Rose |
| 3 | \295257304_de893fc94d.jpg | 2 | Rose |
| 4 | \142235017_07816937c6.jpg | 4 | Tulip |
tensors.shape
(304, 128, 128, 3)
names = ['Daisy','Dandelions','Rose','Sunflower','Tulip']
def display_images(img_path, ax):
img = cv2.imread(r"C:\Users\PRABHAT\Desktop\PRESENT\New folder" + img_path)
ax.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
fig = plt.figure(figsize=(10,10)) # 10 x 10 inches
for i in range(4): # Only 4 images
ax = fig.add_subplot(2, 4, i + 1, xticks=[], yticks=[]) # 2 rows and 4 columns and i+1 define sthe position ticks are markings along axes
ax.set_title(names[targets[i+10]], color='r') # Title to the plot
display_images(files[i+10], ax)
# Create a csv file and save data
images_csv = tensors.reshape(304,128*128*3)
np.savetxt("flower_images.csv", images_csv, fmt='%i', delimiter=",")
# Here we are saving a 2-D array of 60 rows and
# 128*128*3 = 49152 columns
# fmt='%i' -- format the string into integer
# Read the pandas dataframe from csv
data_images = pd.read_csv("flower_images.csv", header=None)
data_images.head()
| 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | ... | 49142 | 49143 | 49144 | 49145 | 49146 | 49147 | 49148 | 49149 | 49150 | 49151 | |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 0 | 9 | 37 | 134 | 8 | 41 | 136 | 5 | 39 | 136 | 8 | ... | 22 | 76 | 57 | 0 | 72 | 39 | 20 | 30 | 19 | 0 |
| 1 | 144 | 171 | 200 | 119 | 157 | 196 | 134 | 160 | 195 | 143 | ... | 0 | 5 | 4 | 0 | 1 | 3 | 0 | 13 | 17 | 3 |
| 2 | 148 | 0 | 3 | 161 | 3 | 0 | 169 | 1 | 0 | 172 | ... | 10 | 202 | 0 | 3 | 191 | 2 | 0 | 234 | 0 | 2 |
| 3 | 32 | 58 | 23 | 33 | 48 | 15 | 21 | 34 | 6 | 5 | ... | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
| 4 | 255 | 205 | 204 | 235 | 97 | 97 | 214 | 40 | 29 | 77 | ... | 39 | 248 | 47 | 40 | 249 | 58 | 50 | 245 | 110 | 116 |
5 rows × 49152 columns
data_images.iloc[:10,:10] # First 10 rows and columns of integer type
| 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | |
|---|---|---|---|---|---|---|---|---|---|---|
| 0 | 9 | 37 | 134 | 8 | 41 | 136 | 5 | 39 | 136 | 8 |
| 1 | 144 | 171 | 200 | 119 | 157 | 196 | 134 | 160 | 195 | 143 |
| 2 | 148 | 0 | 3 | 161 | 3 | 0 | 169 | 1 | 0 | 172 |
| 3 | 32 | 58 | 23 | 33 | 48 | 15 | 21 | 34 | 6 | 5 |
| 4 | 255 | 205 | 204 | 235 | 97 | 97 | 214 | 40 | 29 | 77 |
| 5 | 229 | 241 | 253 | 236 | 247 | 253 | 238 | 247 | 252 | 242 |
| 6 | 81 | 88 | 46 | 4 | 7 | 0 | 36 | 40 | 26 | 23 |
| 7 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
| 8 | 151 | 171 | 182 | 152 | 172 | 183 | 152 | 172 | 183 | 153 |
| 9 | 126 | 100 | 77 | 128 | 102 | 77 | 127 | 103 | 77 | 125 |
data_images.shape
(304, 49152)
# Read image tensors from the dataframe
tensors = data_images.values
tensors.shape
(304, 49152)
tensors = tensors.reshape(-1,128,128,3)
tensors.shape
(304, 128, 128, 3)
tensors = tensors.astype('float32')/255
#This line of code divides all the pixel values in the tensors by 255 and then converts the data type of the array to float32.
Normalization is a common preprocessing step in deep learning that rescales the input features to be in a similar range. Normalizing pixel values to lie between 0 and 1 is important because it helps the optimizer to converge faster and ensures that each feature contributes equally to the training process.
Normalization of tensors refers to the process of scaling the pixel values of an image to a common range, typically between 0 and 1 or -1 and 1. This is done to standardize the data so that the model can process it more effectively. Normalization is necessary because images can have varying pixel ranges depending on factors such as image resolution, brightness, and contrast. By normalizing the pixel values, the model can better understand the relationships between the pixels and more accurately learn patterns in the data.
there is no loss of data in the process of normalization. In fact, normalization is a technique used to scale the data between 0 and 1 without losing any information.
When converting image data to floating-point values, the pixel values are typically in the range of 0 to 255, with 0 representing black and 255 representing white. Dividing each pixel value by 255 scales the values down to the range of 0 to 1, which is a common range for neural network inputs.
One-hot encode the targets One-hot encoding is a technique used to represent categorical data in a format that is suitable for machine learning algorithms. It involves representing each category as a binary vector, where each element in the vector corresponds to a unique category. The element corresponding to the category is set to 1, while all other elements are set to 0.
For example, suppose we have a set of categories: {apple, banana, cherry}. In one-hot encoding, we represent each category as a vector as follows:
apple = [1, 0, 0] banana = [0, 1, 0] cherry = [0, 0, 1]
Converting target values to one-hot encoding is necessary when the target variable has categorical values, especially when building a multi-class classification model. In one-hot encoding, a column is created for each category value in the target variable, and the column for the respective category is marked as 1, while all other columns are marked as 0 for each sample. This representation helps the neural network to understand that the output is categorical and not continuous. In the case of multi-class classification, the output layer of the neural network has multiple neurons, each representing a class. The neuron with the highest value is considered as the predicted class for the input image.
targets = to_categorical(targets, 5)
20% is for testing and rest for training
Letting a specific value for the random_state parameter ensures that the random splitting of the data into training and testing sets will be the same every time the code is run. This is important for reproducibility and also allows for comparison of model performance across different runs.
In the context of machine learning, the validation set is a portion of the data that is used to evaluate the performance of a trained model.
When training a machine learning model, we split the data into training and testing sets.
The model is trained on the training set and its performance is evaluated on the testing set. However, during the training
process, we may adjust the model based on the performance on the testing set. This can lead to overfitting, which means the
model may perform well on the testing set but poorly on new data.
To avoid overfitting, we can use a validation set. The validation set is another portion of the data that is not used in
training the model but is used to tune the models hyperparameters and assess its generalization performance. By evaluating the
model on the validation set, we can choose the best model that performs well on both the training and validation sets. This can
help us to build a more robust and accurate machine learning model.
x_train, x_test, y_train, y_test = train_test_split(tensors, targets,
test_size = 0.2,
random_state = 1)
# Splitting the testing set to validationa and testing set into half
n = int(len(x_test)/2)
x_valid, y_valid = x_test[:n], y_test[:n]
x_test, y_test = x_test[n:], y_test[n:]
x_train.shape, y_train.shape
((243, 128, 128, 3), (243, 5))
x_test.shape, y_test.shape
((31, 128, 128, 3), (31, 5))
x_valid.shape, y_valid.shape
((30, 128, 128, 3), (30, 5))
# Read and display a tensor
print('Label: ', names[np.argmax(y_train[7])])
plt.figure(figsize=(3,3))
plt.imshow((x_train[7]));
Label: Sunflower
Sequential models- are linear stacks of 'layers' where one layer leads to next,i.e., output of previos layer is input to the next layer.
model.add(Conv2D(128, (3, 3), input_shape=x_train.shape[1:]))- It add a 2D convolution layers with 128 filters with each of 3x3 matrix(in pixels), these filters are are use to extract feature like edge , corner detection. These low level features then can be combined iwth lore complex features to distinguish between classes of images.input_shape=x_train.shape[1:] - it tells the shape of each input sample which is quite imprant for the first layer. NOTE- We can use 32,64 or 256 filters and (5,5) or (7,7) kernel size.(HIT AND TRIAL)
LeakyReLU - is a type of activation function used in case of CNN to avoid the vanishing gradient problem.
After the using the 3,3 filters for convolution we nee to do maxpooling so that we can have "dimensionality reduction" In maxpooling we define a pool(filter) of a particular size (say 2,2) then for even 2x2 matrix max mvalue is selected for net layer. (AVERAGE POOLING ?) We are using maxpooling for a 2D layer as our convo layer is also 2D layer (Keras supports 1D and 3D ma pooling layer) We we not using zero padding as we are preserving spatial resolution .
Dropout - 25% of the input units to the Dropout layer will be randomly set to zero at each iteration, which helps to prevent overfitting by reducing the interdependence of the neurons and forcing the model to learn more robust features.
model.add(Conv2D(128, (3, 3))) - layer in the model to further extract more abstract features from the input images. The second Conv2D layer applies another set of filters to the output of the first MaxPooling2D layer, which can help to extract more complex and high-level features from the input images.
model.add(GlobalMaxPooling2D()) - is a global operation that extracts the most important feature from each feature map of the output tensor.Global pooling layers can be used in a variety of cases. Primarily, it can be used to reduce the dimensionality of the feature maps output by some convolutional layer, to replace Flattening and sometimes even Dense layers in your classifier.
Dense - dense layer is used to transform the features extracted by convolutional layers into a format that can be used for classification or regression. Dense layers perform a matrix multiplication operation on the input features, followed by an activation function, to produce a set of output values. Dense layers are often used in neural networks for tasks that require mapping input to output in a non-linear way, such as image classification 512 or 32 neurons.
The compile method is used to configure the learning process of a model. It requires the user to specify a loss function, an optimizer, and the evaluation metrics that are used to judge the performance of the model during training and testing. The choice of evaluation metrics depends on the specific problem and the goals of the model. The choice of evaluation metric should be based on the problem at hand and the specific goals of the model. For instance, accuracy might be a good choice if the classes are well-balanced and the goal is to have a high overall accuracy, while precision and recall might be more appropriate if there is class imbalance and the goal is to correctly classify the positive cases.
def model():
model = Sequential() # Initialise the empty sequential model
model.add(Conv2D(128, (3, 3), input_shape=x_train.shape[1:]))
model.add(LeakyReLU(alpha=0.02))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(128, (3, 3)))
model.add(LeakyReLU(alpha=0.02))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# FINAL LAYER FOR CLASSIFICATION
model.add(GlobalMaxPooling2D())
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.02))
model.add(Dropout(0.5))
model.add(Dense(5)) # Output layer
model.add(Activation('softmax'))
# TODO: Compile the model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
model = model()
In Keras, callbacks are functions that can be applied at certain stages of the training process. They are passed to the fit() method using the callbacks argument, and allow you to monitor and modify the behavior of the model during training.
Some common examples of callback functions are:
ModelCheckpoint: Saves the model weights after every epoch or when the validation loss improves. EarlyStopping: Stops the training process if the validation loss stops improving after a certain number of epochs. ReduceLROnPlateau: Reduces the learning rate if the validation loss stops improving after a certain number of epochs. Using callbacks can help you to achieve better performance and avoid overfitting in your models.
# To save the best model
checkpointer = ModelCheckpoint(filepath='weights.best.model.hdf5',verbose=2, save_best_only=True)
# Earlystopping
early_stop = EarlyStopping(monitor='val_loss', patience=5)
# To reduce learning rate dynamically
lr_reduction = ReduceLROnPlateau(monitor='val_loss',patience=5, verbose=2, factor=0.2)
model.summary()
Model: "sequential_1"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 126, 126, 128) 3584
leaky_re_lu (LeakyReLU) (None, 126, 126, 128) 0
max_pooling2d (MaxPooling2D (None, 63, 63, 128) 0
)
dropout (Dropout) (None, 63, 63, 128) 0
conv2d_1 (Conv2D) (None, 61, 61, 128) 147584
leaky_re_lu_1 (LeakyReLU) (None, 61, 61, 128) 0
max_pooling2d_1 (MaxPooling (None, 30, 30, 128) 0
2D)
dropout_1 (Dropout) (None, 30, 30, 128) 0
global_max_pooling2d (Globa (None, 128) 0
lMaxPooling2D)
dense (Dense) (None, 512) 66048
leaky_re_lu_2 (LeakyReLU) (None, 512) 0
dropout_2 (Dropout) (None, 512) 0
dense_1 (Dense) (None, 5) 2565
activation (Activation) (None, 5) 0
=================================================================
Total params: 219,781
Trainable params: 219,781
Non-trainable params: 0
_________________________________________________________________
history = model.fit(x_train, y_train,
epochs=75, batch_size=32, verbose=2,
validation_data=(x_valid, y_valid),
callbacks=[checkpointer, lr_reduction ,early_stop])
Epoch 1/75 Epoch 1: val_loss improved from inf to 1.59720, saving model to weights.best.model.hdf5 8/8 - 21s - loss: 1.6225 - accuracy: 0.2428 - val_loss: 1.5972 - val_accuracy: 0.3000 - lr: 0.0010 - 21s/epoch - 3s/step Epoch 2/75 Epoch 2: val_loss improved from 1.59720 to 1.56938, saving model to weights.best.model.hdf5 8/8 - 14s - loss: 1.5922 - accuracy: 0.2675 - val_loss: 1.5694 - val_accuracy: 0.3333 - lr: 0.0010 - 14s/epoch - 2s/step Epoch 3/75 Epoch 3: val_loss improved from 1.56938 to 1.52717, saving model to weights.best.model.hdf5 8/8 - 14s - loss: 1.5413 - accuracy: 0.3251 - val_loss: 1.5272 - val_accuracy: 0.3333 - lr: 0.0010 - 14s/epoch - 2s/step Epoch 4/75 Epoch 4: val_loss improved from 1.52717 to 1.46229, saving model to weights.best.model.hdf5 8/8 - 14s - loss: 1.4699 - accuracy: 0.3498 - val_loss: 1.4623 - val_accuracy: 0.3333 - lr: 0.0010 - 14s/epoch - 2s/step Epoch 5/75 Epoch 5: val_loss improved from 1.46229 to 1.39708, saving model to weights.best.model.hdf5 8/8 - 14s - loss: 1.3801 - accuracy: 0.4403 - val_loss: 1.3971 - val_accuracy: 0.5333 - lr: 0.0010 - 14s/epoch - 2s/step Epoch 6/75 Epoch 6: val_loss improved from 1.39708 to 1.26994, saving model to weights.best.model.hdf5 8/8 - 14s - loss: 1.2584 - accuracy: 0.4938 - val_loss: 1.2699 - val_accuracy: 0.4667 - lr: 0.0010 - 14s/epoch - 2s/step Epoch 7/75 Epoch 7: val_loss improved from 1.26994 to 1.22469, saving model to weights.best.model.hdf5 8/8 - 14s - loss: 1.1444 - accuracy: 0.5309 - val_loss: 1.2247 - val_accuracy: 0.4333 - lr: 0.0010 - 14s/epoch - 2s/step Epoch 8/75 Epoch 8: val_loss improved from 1.22469 to 1.07210, saving model to weights.best.model.hdf5 8/8 - 14s - loss: 1.0373 - accuracy: 0.5556 - val_loss: 1.0721 - val_accuracy: 0.5667 - lr: 0.0010 - 14s/epoch - 2s/step Epoch 9/75 Epoch 9: val_loss did not improve from 1.07210 8/8 - 14s - loss: 1.0106 - accuracy: 0.5432 - val_loss: 1.0836 - val_accuracy: 0.6667 - lr: 0.0010 - 14s/epoch - 2s/step Epoch 10/75 Epoch 10: val_loss improved from 1.07210 to 1.04970, saving model to weights.best.model.hdf5 8/8 - 14s - loss: 0.9558 - accuracy: 0.6214 - val_loss: 1.0497 - val_accuracy: 0.6667 - lr: 0.0010 - 14s/epoch - 2s/step Epoch 11/75 Epoch 11: val_loss did not improve from 1.04970 8/8 - 14s - loss: 0.9427 - accuracy: 0.5926 - val_loss: 1.0740 - val_accuracy: 0.6333 - lr: 0.0010 - 14s/epoch - 2s/step Epoch 12/75 Epoch 12: val_loss improved from 1.04970 to 0.99015, saving model to weights.best.model.hdf5 8/8 - 14s - loss: 0.8451 - accuracy: 0.6790 - val_loss: 0.9901 - val_accuracy: 0.7000 - lr: 0.0010 - 14s/epoch - 2s/step Epoch 13/75 Epoch 13: val_loss did not improve from 0.99015 8/8 - 14s - loss: 0.8303 - accuracy: 0.6749 - val_loss: 1.0766 - val_accuracy: 0.5000 - lr: 0.0010 - 14s/epoch - 2s/step Epoch 14/75 Epoch 14: val_loss improved from 0.99015 to 0.96134, saving model to weights.best.model.hdf5 8/8 - 14s - loss: 0.8410 - accuracy: 0.6872 - val_loss: 0.9613 - val_accuracy: 0.7333 - lr: 0.0010 - 14s/epoch - 2s/step Epoch 15/75 Epoch 15: val_loss improved from 0.96134 to 0.93701, saving model to weights.best.model.hdf5 8/8 - 14s - loss: 0.8185 - accuracy: 0.6543 - val_loss: 0.9370 - val_accuracy: 0.7333 - lr: 0.0010 - 14s/epoch - 2s/step Epoch 16/75 Epoch 16: val_loss did not improve from 0.93701 8/8 - 14s - loss: 0.8659 - accuracy: 0.6667 - val_loss: 1.0123 - val_accuracy: 0.6333 - lr: 0.0010 - 14s/epoch - 2s/step Epoch 17/75 Epoch 17: val_loss improved from 0.93701 to 0.93279, saving model to weights.best.model.hdf5 8/8 - 14s - loss: 0.8252 - accuracy: 0.6543 - val_loss: 0.9328 - val_accuracy: 0.7333 - lr: 0.0010 - 14s/epoch - 2s/step Epoch 18/75 Epoch 18: val_loss improved from 0.93279 to 0.92610, saving model to weights.best.model.hdf5 8/8 - 14s - loss: 0.7623 - accuracy: 0.7160 - val_loss: 0.9261 - val_accuracy: 0.7333 - lr: 0.0010 - 14s/epoch - 2s/step Epoch 19/75 Epoch 19: val_loss did not improve from 0.92610 8/8 - 14s - loss: 0.7210 - accuracy: 0.7325 - val_loss: 0.9481 - val_accuracy: 0.6667 - lr: 0.0010 - 14s/epoch - 2s/step Epoch 20/75 Epoch 20: val_loss improved from 0.92610 to 0.88635, saving model to weights.best.model.hdf5 8/8 - 14s - loss: 0.7153 - accuracy: 0.7366 - val_loss: 0.8863 - val_accuracy: 0.7000 - lr: 0.0010 - 14s/epoch - 2s/step Epoch 21/75 Epoch 21: val_loss improved from 0.88635 to 0.86070, saving model to weights.best.model.hdf5 8/8 - 14s - loss: 0.6701 - accuracy: 0.7572 - val_loss: 0.8607 - val_accuracy: 0.7333 - lr: 0.0010 - 14s/epoch - 2s/step Epoch 22/75 Epoch 22: val_loss did not improve from 0.86070 8/8 - 14s - loss: 0.6697 - accuracy: 0.7160 - val_loss: 0.9386 - val_accuracy: 0.6667 - lr: 0.0010 - 14s/epoch - 2s/step Epoch 23/75 Epoch 23: val_loss improved from 0.86070 to 0.78666, saving model to weights.best.model.hdf5 8/8 - 14s - loss: 0.6271 - accuracy: 0.7737 - val_loss: 0.7867 - val_accuracy: 0.8000 - lr: 0.0010 - 14s/epoch - 2s/step Epoch 24/75 Epoch 24: val_loss did not improve from 0.78666 8/8 - 14s - loss: 0.6286 - accuracy: 0.7778 - val_loss: 0.8685 - val_accuracy: 0.7667 - lr: 0.0010 - 14s/epoch - 2s/step Epoch 25/75 Epoch 25: val_loss did not improve from 0.78666 8/8 - 14s - loss: 0.6178 - accuracy: 0.7654 - val_loss: 0.9355 - val_accuracy: 0.6000 - lr: 0.0010 - 14s/epoch - 2s/step Epoch 26/75 Epoch 26: val_loss did not improve from 0.78666 8/8 - 14s - loss: 0.5686 - accuracy: 0.7942 - val_loss: 0.9289 - val_accuracy: 0.6667 - lr: 0.0010 - 14s/epoch - 2s/step Epoch 27/75 Epoch 27: val_loss did not improve from 0.78666 8/8 - 14s - loss: 0.5665 - accuracy: 0.7901 - val_loss: 0.8473 - val_accuracy: 0.7000 - lr: 0.0010 - 14s/epoch - 2s/step Epoch 28/75 Epoch 28: val_loss did not improve from 0.78666 Epoch 28: ReduceLROnPlateau reducing learning rate to 0.00020000000949949026. 8/8 - 14s - loss: 0.5360 - accuracy: 0.8148 - val_loss: 0.8405 - val_accuracy: 0.7000 - lr: 0.0010 - 14s/epoch - 2s/step Epoch 29/75 Epoch 29: val_loss did not improve from 0.78666 8/8 - 14s - loss: 0.4816 - accuracy: 0.8436 - val_loss: 0.8123 - val_accuracy: 0.8000 - lr: 2.0000e-04 - 14s/epoch - 2s/step Epoch 30/75 Epoch 30: val_loss did not improve from 0.78666 8/8 - 14s - loss: 0.4461 - accuracy: 0.8601 - val_loss: 0.8383 - val_accuracy: 0.7333 - lr: 2.0000e-04 - 14s/epoch - 2s/step Epoch 31/75 Epoch 31: val_loss improved from 0.78666 to 0.77482, saving model to weights.best.model.hdf5 8/8 - 14s - loss: 0.4928 - accuracy: 0.8272 - val_loss: 0.7748 - val_accuracy: 0.8000 - lr: 2.0000e-04 - 14s/epoch - 2s/step Epoch 32/75 Epoch 32: val_loss did not improve from 0.77482 8/8 - 14s - loss: 0.4193 - accuracy: 0.8601 - val_loss: 0.7875 - val_accuracy: 0.8000 - lr: 2.0000e-04 - 14s/epoch - 2s/step Epoch 33/75 Epoch 33: val_loss did not improve from 0.77482 8/8 - 14s - loss: 0.4369 - accuracy: 0.8519 - val_loss: 0.7794 - val_accuracy: 0.7333 - lr: 2.0000e-04 - 14s/epoch - 2s/step Epoch 34/75 Epoch 34: val_loss did not improve from 0.77482 8/8 - 14s - loss: 0.4334 - accuracy: 0.8477 - val_loss: 0.7802 - val_accuracy: 0.7333 - lr: 2.0000e-04 - 14s/epoch - 2s/step Epoch 35/75 Epoch 35: val_loss improved from 0.77482 to 0.76533, saving model to weights.best.model.hdf5 8/8 - 14s - loss: 0.4074 - accuracy: 0.8519 - val_loss: 0.7653 - val_accuracy: 0.7667 - lr: 2.0000e-04 - 14s/epoch - 2s/step Epoch 36/75 Epoch 36: val_loss did not improve from 0.76533 8/8 - 14s - loss: 0.3807 - accuracy: 0.8889 - val_loss: 0.7988 - val_accuracy: 0.7000 - lr: 2.0000e-04 - 14s/epoch - 2s/step Epoch 37/75 Epoch 37: val_loss improved from 0.76533 to 0.75177, saving model to weights.best.model.hdf5 8/8 - 14s - loss: 0.3964 - accuracy: 0.8807 - val_loss: 0.7518 - val_accuracy: 0.8667 - lr: 2.0000e-04 - 14s/epoch - 2s/step Epoch 38/75 Epoch 38: val_loss did not improve from 0.75177 8/8 - 14s - loss: 0.4057 - accuracy: 0.8642 - val_loss: 0.7536 - val_accuracy: 0.8333 - lr: 2.0000e-04 - 14s/epoch - 2s/step Epoch 39/75 Epoch 39: val_loss did not improve from 0.75177 8/8 - 14s - loss: 0.3778 - accuracy: 0.8848 - val_loss: 0.7776 - val_accuracy: 0.7333 - lr: 2.0000e-04 - 14s/epoch - 2s/step Epoch 40/75 Epoch 40: val_loss improved from 0.75177 to 0.74690, saving model to weights.best.model.hdf5 8/8 - 14s - loss: 0.3967 - accuracy: 0.8765 - val_loss: 0.7469 - val_accuracy: 0.8333 - lr: 2.0000e-04 - 14s/epoch - 2s/step Epoch 41/75 Epoch 41: val_loss did not improve from 0.74690 8/8 - 14s - loss: 0.3633 - accuracy: 0.8889 - val_loss: 0.7666 - val_accuracy: 0.8000 - lr: 2.0000e-04 - 14s/epoch - 2s/step Epoch 42/75 Epoch 42: val_loss did not improve from 0.74690 8/8 - 14s - loss: 0.3758 - accuracy: 0.8807 - val_loss: 0.7745 - val_accuracy: 0.7667 - lr: 2.0000e-04 - 14s/epoch - 2s/step Epoch 43/75 Epoch 43: val_loss did not improve from 0.74690 8/8 - 15s - loss: 0.4139 - accuracy: 0.8683 - val_loss: 0.7600 - val_accuracy: 0.8333 - lr: 2.0000e-04 - 15s/epoch - 2s/step Epoch 44/75 Epoch 44: val_loss did not improve from 0.74690 8/8 - 18s - loss: 0.3807 - accuracy: 0.8642 - val_loss: 0.7568 - val_accuracy: 0.8000 - lr: 2.0000e-04 - 18s/epoch - 2s/step Epoch 45/75 Epoch 45: val_loss did not improve from 0.74690 Epoch 45: ReduceLROnPlateau reducing learning rate to 4.0000001899898055e-05. 8/8 - 21s - loss: 0.3633 - accuracy: 0.8724 - val_loss: 0.7727 - val_accuracy: 0.7667 - lr: 2.0000e-04 - 21s/epoch - 3s/step Epoch 46/75 Epoch 46: val_loss did not improve from 0.74690 8/8 - 14s - loss: 0.3743 - accuracy: 0.8642 - val_loss: 0.7675 - val_accuracy: 0.8000 - lr: 4.0000e-05 - 14s/epoch - 2s/step Epoch 47/75 Epoch 47: val_loss did not improve from 0.74690 8/8 - 14s - loss: 0.3672 - accuracy: 0.8642 - val_loss: 0.7547 - val_accuracy: 0.8000 - lr: 4.0000e-05 - 14s/epoch - 2s/step Epoch 48/75 Epoch 48: val_loss improved from 0.74690 to 0.74610, saving model to weights.best.model.hdf5 8/8 - 14s - loss: 0.3492 - accuracy: 0.8807 - val_loss: 0.7461 - val_accuracy: 0.8000 - lr: 4.0000e-05 - 14s/epoch - 2s/step Epoch 49/75 Epoch 49: val_loss did not improve from 0.74610 8/8 - 14s - loss: 0.3473 - accuracy: 0.8848 - val_loss: 0.7505 - val_accuracy: 0.8000 - lr: 4.0000e-05 - 14s/epoch - 2s/step Epoch 50/75 Epoch 50: val_loss did not improve from 0.74610 8/8 - 14s - loss: 0.3443 - accuracy: 0.8930 - val_loss: 0.7474 - val_accuracy: 0.8000 - lr: 4.0000e-05 - 14s/epoch - 2s/step Epoch 51/75 Epoch 51: val_loss did not improve from 0.74610 8/8 - 14s - loss: 0.3500 - accuracy: 0.8971 - val_loss: 0.7493 - val_accuracy: 0.8000 - lr: 4.0000e-05 - 14s/epoch - 2s/step Epoch 52/75 Epoch 52: val_loss did not improve from 0.74610 8/8 - 14s - loss: 0.3438 - accuracy: 0.9012 - val_loss: 0.7468 - val_accuracy: 0.8000 - lr: 4.0000e-05 - 14s/epoch - 2s/step Epoch 53/75 Epoch 53: val_loss improved from 0.74610 to 0.74576, saving model to weights.best.model.hdf5 8/8 - 14s - loss: 0.3504 - accuracy: 0.9095 - val_loss: 0.7458 - val_accuracy: 0.8000 - lr: 4.0000e-05 - 14s/epoch - 2s/step Epoch 54/75 Epoch 54: val_loss did not improve from 0.74576 8/8 - 14s - loss: 0.3366 - accuracy: 0.8889 - val_loss: 0.7471 - val_accuracy: 0.8000 - lr: 4.0000e-05 - 14s/epoch - 2s/step Epoch 55/75 Epoch 55: val_loss did not improve from 0.74576 8/8 - 14s - loss: 0.3667 - accuracy: 0.8848 - val_loss: 0.7469 - val_accuracy: 0.8000 - lr: 4.0000e-05 - 14s/epoch - 2s/step Epoch 56/75 Epoch 56: val_loss did not improve from 0.74576 8/8 - 14s - loss: 0.3509 - accuracy: 0.8971 - val_loss: 0.7511 - val_accuracy: 0.8000 - lr: 4.0000e-05 - 14s/epoch - 2s/step Epoch 57/75 Epoch 57: val_loss did not improve from 0.74576 8/8 - 14s - loss: 0.3338 - accuracy: 0.9012 - val_loss: 0.7541 - val_accuracy: 0.8000 - lr: 4.0000e-05 - 14s/epoch - 2s/step Epoch 58/75 Epoch 58: val_loss did not improve from 0.74576 Epoch 58: ReduceLROnPlateau reducing learning rate to 8.000000525498762e-06. 8/8 - 14s - loss: 0.3497 - accuracy: 0.8848 - val_loss: 0.7458 - val_accuracy: 0.8000 - lr: 4.0000e-05 - 14s/epoch - 2s/step Epoch 59/75 Epoch 59: val_loss did not improve from 0.74576 8/8 - 14s - loss: 0.3430 - accuracy: 0.8765 - val_loss: 0.7471 - val_accuracy: 0.8000 - lr: 8.0000e-06 - 14s/epoch - 2s/step Epoch 60/75 Epoch 60: val_loss did not improve from 0.74576 8/8 - 14s - loss: 0.3406 - accuracy: 0.8724 - val_loss: 0.7481 - val_accuracy: 0.8000 - lr: 8.0000e-06 - 14s/epoch - 2s/step Epoch 61/75 Epoch 61: val_loss did not improve from 0.74576 8/8 - 14s - loss: 0.3249 - accuracy: 0.9053 - val_loss: 0.7494 - val_accuracy: 0.8000 - lr: 8.0000e-06 - 14s/epoch - 2s/step Epoch 62/75 Epoch 62: val_loss did not improve from 0.74576 8/8 - 14s - loss: 0.3468 - accuracy: 0.8724 - val_loss: 0.7496 - val_accuracy: 0.8000 - lr: 8.0000e-06 - 14s/epoch - 2s/step Epoch 63/75 Epoch 63: val_loss did not improve from 0.74576 Epoch 63: ReduceLROnPlateau reducing learning rate to 1.6000001778593287e-06. 8/8 - 14s - loss: 0.3481 - accuracy: 0.8971 - val_loss: 0.7493 - val_accuracy: 0.8000 - lr: 8.0000e-06 - 14s/epoch - 2s/step Epoch 64/75 Epoch 64: val_loss did not improve from 0.74576 8/8 - 14s - loss: 0.3499 - accuracy: 0.8807 - val_loss: 0.7494 - val_accuracy: 0.8000 - lr: 1.6000e-06 - 14s/epoch - 2s/step Epoch 65/75 Epoch 65: val_loss did not improve from 0.74576 8/8 - 14s - loss: 0.3609 - accuracy: 0.8807 - val_loss: 0.7490 - val_accuracy: 0.8000 - lr: 1.6000e-06 - 14s/epoch - 2s/step Epoch 66/75 Epoch 66: val_loss did not improve from 0.74576 8/8 - 14s - loss: 0.3596 - accuracy: 0.8848 - val_loss: 0.7487 - val_accuracy: 0.8000 - lr: 1.6000e-06 - 14s/epoch - 2s/step Epoch 67/75 Epoch 67: val_loss did not improve from 0.74576 8/8 - 15s - loss: 0.3146 - accuracy: 0.9012 - val_loss: 0.7482 - val_accuracy: 0.8000 - lr: 1.6000e-06 - 15s/epoch - 2s/step Epoch 68/75 Epoch 68: val_loss did not improve from 0.74576 Epoch 68: ReduceLROnPlateau reducing learning rate to 3.200000264769187e-07. 8/8 - 14s - loss: 0.3254 - accuracy: 0.9053 - val_loss: 0.7482 - val_accuracy: 0.8000 - lr: 1.6000e-06 - 14s/epoch - 2s/step Epoch 69/75 Epoch 69: val_loss did not improve from 0.74576 8/8 - 14s - loss: 0.3355 - accuracy: 0.8848 - val_loss: 0.7482 - val_accuracy: 0.8000 - lr: 3.2000e-07 - 14s/epoch - 2s/step Epoch 70/75 Epoch 70: val_loss did not improve from 0.74576 8/8 - 14s - loss: 0.3479 - accuracy: 0.8807 - val_loss: 0.7482 - val_accuracy: 0.8000 - lr: 3.2000e-07 - 14s/epoch - 2s/step Epoch 71/75 Epoch 71: val_loss did not improve from 0.74576 8/8 - 14s - loss: 0.3263 - accuracy: 0.9095 - val_loss: 0.7482 - val_accuracy: 0.8000 - lr: 3.2000e-07 - 14s/epoch - 2s/step Epoch 72/75 Epoch 72: val_loss did not improve from 0.74576 8/8 - 14s - loss: 0.3441 - accuracy: 0.8848 - val_loss: 0.7481 - val_accuracy: 0.8000 - lr: 3.2000e-07 - 14s/epoch - 2s/step Epoch 73/75 Epoch 73: val_loss did not improve from 0.74576 Epoch 73: ReduceLROnPlateau reducing learning rate to 6.400000529538374e-08. 8/8 - 14s - loss: 0.3516 - accuracy: 0.8971 - val_loss: 0.7481 - val_accuracy: 0.8000 - lr: 3.2000e-07 - 14s/epoch - 2s/step Epoch 74/75 Epoch 74: val_loss did not improve from 0.74576 8/8 - 14s - loss: 0.3391 - accuracy: 0.9012 - val_loss: 0.7480 - val_accuracy: 0.8000 - lr: 6.4000e-08 - 14s/epoch - 2s/step Epoch 75/75 Epoch 75: val_loss did not improve from 0.74576 8/8 - 14s - loss: 0.3531 - accuracy: 0.8889 - val_loss: 0.7480 - val_accuracy: 0.8000 - lr: 6.4000e-08 - 14s/epoch - 2s/step
# Load the model with the best validation accuracy
model.load_weights('weights.best.model.hdf5')
# Calculate classification accuracy on the testing set
score = model.evaluate(x_test, y_test)
score
1/1 [==============================] - 0s 417ms/step - loss: 0.7566 - accuracy: 0.7419
[0.7566136121749878, 0.7419354915618896]
# Calculate classification accuracy on the training set
score = model.evaluate(x_train, y_train)
score
8/8 [==============================] - 3s 392ms/step - loss: 0.3507 - accuracy: 0.9383
[0.35065749287605286, 0.9382715821266174]
# Calculate classification accuracy on the validation set
score = model.evaluate(x_valid, y_valid)
score
1/1 [==============================] - 0s 446ms/step - loss: 0.7458 - accuracy: 0.8000
[0.7457600235939026, 0.800000011920929]
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper right')
plt.show()
model.save('model.h5')
model1 = load_model('model.h5')
# Model predictions for the testing dataset
y_test_predict = model1.predict(x_test)
1/1 [==============================] - 3s 3s/step
y_test_predict = np.argmax(y_test_predict,axis=1)
# Display true labels and predictions
fig = plt.figure(figsize=(18, 18))
for i, idx in enumerate(np.random.choice(x_test.shape[0], size=16, replace=False)):
ax = fig.add_subplot(4, 4, i + 1, xticks=[], yticks=[])
ax.imshow(np.squeeze(x_test[idx]))
pred_idx = y_test_predict[idx]
true_idx = np.argmax(y_test[idx])
ax.set_title("{} ({})".format(names[pred_idx], names[true_idx]),
color=("#4876ff" if pred_idx == true_idx else "darkred"))